#----------------------------Reproducible----------------------------------------------------------------------------------------
import numpy as np
import tensorflow as tf
import random as rn
import os
seed=0
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
rn.seed(seed)
#session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
session_conf =tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
#tf.set_random_seed(seed)
tf.compat.v1.set_random_seed(seed)
#sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)
K.set_session(sess)
#----------------------------Reproducible----------------------------------------------------------------------------------------
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#--------------------------------------------------------------------------------------------------------------------------------
from keras.models import Model
from keras.layers import Dense, Input, Flatten, Activation, Dropout, Layer
from keras.layers.normalization import BatchNormalization
from keras.utils import to_categorical
from keras import optimizers,initializers,constraints,regularizers
from keras import backend as K
from keras.callbacks import LambdaCallback,ModelCheckpoint
from keras.utils import plot_model
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import ExtraTreesClassifier
from sklearn import svm
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
import h5py
import math
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
%matplotlib inline
matplotlib.style.use('ggplot')
import os
from skimage import io
from PIL import Image
from sklearn.model_selection import train_test_split
import scipy.sparse as sparse
#--------------------------------------------------------------------------------------------------------------------------------
#Import ourslef defined methods
import sys
sys.path.append(r"./Defined")
import Functions as F
# The following code should be added before the keras model
#np.random.seed(seed)
/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
/usr/local/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/usr/local/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/usr/local/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/usr/local/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/usr/local/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/usr/local/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
Using TensorFlow backend.
dataset_path='./Dataset/coil-20-proc/'
samples={}
for dirpath, dirnames, filenames in os.walk(dataset_path):
#print(dirpath)
#print(dirnames)
#print(filenames)
dirnames.sort()
filenames.sort()
for filename in [f for f in filenames if f.endswith(".png") and not f.find('checkpoint')>0]:
full_path = os.path.join(dirpath, filename)
file_identifier=filename.split('__')[0][3:]
if file_identifier not in samples.keys():
samples[file_identifier] = []
# Direct read
#image = io.imread(full_path)
# Resize read
image_=Image.open(full_path).resize((20, 20),Image.ANTIALIAS)
image=np.asarray(image_)
samples[file_identifier].append(image)
#plt.imshow(samples['1'][0].reshape(20,20))
data_arr_list=[]
label_arr_list=[]
for key_i in samples.keys():
key_i_for_label=[int(key_i)-1]
data_arr_list.append(np.array(samples[key_i]))
label_arr_list.append(np.array(72*key_i_for_label))
data_arr=np.concatenate(data_arr_list).reshape(1440, 20*20).astype('float32') / 255.
label_arr_onehot=np.concatenate(label_arr_list)#to_categorical(np.concatenate(label_arr_list))
C_train_x,C_test_x,C_train_y,C_test_y= train_test_split(data_arr,label_arr_onehot,test_size=0.2,random_state=seed)
x_train,x_validate,y_train_onehot,y_validate_onehot= train_test_split(C_train_x,C_train_y,test_size=0.1,random_state=seed)
x_test=C_test_x
y_test_onehot=C_test_y
print('Shape of x_train: ' + str(x_train.shape))
print('Shape of x_validate: ' + str(x_validate.shape))
print('Shape of x_test: ' + str(x_test.shape))
print('Shape of y_train: ' + str(y_train_onehot.shape))
print('Shape of y_validate: ' + str(y_validate_onehot.shape))
print('Shape of y_test: ' + str(y_test_onehot.shape))
print('Shape of C_train_x: ' + str(C_train_x.shape))
print('Shape of C_train_y: ' + str(C_train_y.shape))
print('Shape of C_test_x: ' + str(C_test_x.shape))
print('Shape of C_test_y: ' + str(C_test_y.shape))
Shape of x_train: (1036, 400) Shape of x_validate: (116, 400) Shape of x_test: (288, 400) Shape of y_train: (1036,) Shape of y_validate: (116,) Shape of y_test: (288,) Shape of C_train_x: (1152, 400) Shape of C_train_y: (1152,) Shape of C_test_x: (288, 400) Shape of C_test_y: (288,)
key_feture_number=50
np.random.seed(seed)
#--------------------------------------------------------------------------------------------------------------------------------
class Feature_Select_Layer(Layer):
def __init__(self, output_dim, **kwargs):
super(Feature_Select_Layer, self).__init__(**kwargs)
self.output_dim = output_dim
def build(self, input_shape):
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[1],),
initializer=initializers.RandomUniform(minval=0.999999, maxval=0.9999999, seed=seed),
trainable=True)
super(Feature_Select_Layer, self).build(input_shape)
def call(self, x, selection=False,k=key_feture_number):
kernel=K.pow(self.kernel,2)
if selection:
kernel_=K.transpose(kernel)
kth_largest = tf.math.top_k(kernel_, k=k)[0][-1]
kernel = tf.where(condition=K.less(kernel,kth_largest),x=K.zeros_like(kernel),y=kernel)
return K.dot(x, tf.linalg.tensor_diag(kernel))
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
#--------------------------------------------------------------------------------------------------------------------------------
def Autoencoder(p_data_feature=x_train.shape[1],\
p_encoding_dim=key_feture_number,\
p_learning_rate= 1E-3):
input_img = Input(shape=(p_data_feature,), name='input_img')
encoded = Dense(p_encoding_dim, activation='linear',kernel_initializer=initializers.glorot_uniform(seed))(input_img)
bottleneck=encoded
decoded = Dense(p_data_feature, activation='linear',kernel_initializer=initializers.glorot_uniform(seed))(encoded)
latent_encoder = Model(input_img, bottleneck)
autoencoder = Model(input_img, decoded)
autoencoder.compile(loss='mean_squared_error', optimizer=optimizers.Adam(lr=p_learning_rate))
print('Autoencoder Structure-------------------------------------')
autoencoder.summary()
#print('Latent Encoder Structure-------------------------------------')
#latent_encoder.summary()
return autoencoder,latent_encoder
#--------------------------------------------------------------------------------------------------------------------------------
def Identity_Autoencoder(p_data_feature=x_train.shape[1],\
p_encoding_dim=key_feture_number,\
p_learning_rate= 1E-3):
input_img = Input(shape=(p_data_feature,), name='autoencoder_input')
feature_selection = Feature_Select_Layer(output_dim=p_data_feature,\
input_shape=(p_data_feature,),\
name='feature_selection')
feature_selection_score=feature_selection(input_img)
encoded = Dense(p_encoding_dim,\
activation='linear',\
kernel_initializer=initializers.glorot_uniform(seed),\
name='autoencoder_hidden_layer')
encoded_score=encoded(feature_selection_score)
bottleneck_score=encoded_score
decoded = Dense(p_data_feature,\
activation='linear',\
kernel_initializer=initializers.glorot_uniform(seed),\
name='autoencoder_output')
decoded_score =decoded(bottleneck_score)
latent_encoder_score = Model(input_img, bottleneck_score)
autoencoder = Model(input_img, decoded_score)
autoencoder.compile(loss='mean_squared_error',\
optimizer=optimizers.Adam(lr=p_learning_rate))
print('Autoencoder Structure-------------------------------------')
autoencoder.summary()
return autoencoder,latent_encoder_score
#--------------------------------------------------------------------------------------------------------------------------------
def Fractal_Autoencoder(p_data_feature=x_train.shape[1],\
p_feture_number=key_feture_number,\
p_encoding_dim=key_feture_number,\
p_learning_rate=1E-3,\
p_loss_weight_1=1,\
p_loss_weight_2=2):
input_img = Input(shape=(p_data_feature,), name='autoencoder_input')
feature_selection = Feature_Select_Layer(output_dim=p_data_feature,\
input_shape=(p_data_feature,),\
name='feature_selection')
feature_selection_score=feature_selection(input_img)
feature_selection_choose=feature_selection(input_img,selection=True,k=p_feture_number)
encoded = Dense(p_encoding_dim,\
activation='linear',\
kernel_initializer=initializers.glorot_uniform(seed),\
name='autoencoder_hidden_layer')
encoded_score=encoded(feature_selection_score)
encoded_choose=encoded(feature_selection_choose)
bottleneck_score=encoded_score
bottleneck_choose=encoded_choose
decoded = Dense(p_data_feature,\
activation='linear',\
kernel_initializer=initializers.glorot_uniform(seed),\
name='autoencoder_output')
decoded_score =decoded(bottleneck_score)
decoded_choose =decoded(bottleneck_choose)
latent_encoder_score = Model(input_img, bottleneck_score)
latent_encoder_choose = Model(input_img, bottleneck_choose)
feature_selection_output=Model(input_img,feature_selection_choose)
autoencoder = Model(input_img, [decoded_score,decoded_choose])
autoencoder.compile(loss=['mean_squared_error','mean_squared_error'],\
loss_weights=[p_loss_weight_1, p_loss_weight_2],\
optimizer=optimizers.Adam(lr=p_learning_rate))
print('Autoencoder Structure-------------------------------------')
autoencoder.summary()
return autoencoder,feature_selection_output,latent_encoder_score,latent_encoder_choose
epochs_number=200
batch_size_value=128
loss_weight_1=0.0078125
F_AE,\
feature_selection_output,\
latent_encoder_score_F_AE,\
latent_encoder_choose_F_AE=Fractal_Autoencoder(p_data_feature=x_train.shape[1],\
p_feture_number=key_feture_number,\
p_encoding_dim=key_feture_number,\
p_learning_rate= 1E-3,\
p_loss_weight_1=loss_weight_1,\
p_loss_weight_2=1)
#file_name="./log/F_AE_"+str(key_feture_number)+".png"
#plot_model(F_AE, to_file=file_name,show_shapes=True)
WARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:541: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.
WARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:4432: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead.
WARNING:tensorflow:From <ipython-input-6-478f6b761227>:22: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
WARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:66: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.
WARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/keras/optimizers.py:793: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.
Autoencoder Structure-------------------------------------
Model: "model_4"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
autoencoder_input (InputLayer) (None, 400) 0
__________________________________________________________________________________________________
feature_selection (Feature_Sele (None, 400) 400 autoencoder_input[0][0]
autoencoder_input[0][0]
__________________________________________________________________________________________________
autoencoder_hidden_layer (Dense (None, 50) 20050 feature_selection[0][0]
feature_selection[1][0]
__________________________________________________________________________________________________
autoencoder_output (Dense) (None, 400) 20400 autoencoder_hidden_layer[0][0]
autoencoder_hidden_layer[1][0]
==================================================================================================
Total params: 40,850
Trainable params: 40,850
Non-trainable params: 0
__________________________________________________________________________________________________
model_checkpoint=ModelCheckpoint('./log_weights/F_AE_'+str(key_feture_number)+'_weights_'+str(loss_weight_1)+'.{epoch:04d}.hdf5',period=100,save_weights_only=True,verbose=1)
#print_weights = LambdaCallback(on_epoch_end=lambda batch, logs: print(F_AE.layers[1].get_weights()))
F_AE_history = F_AE.fit(x_train, [x_train,x_train],\
epochs=epochs_number,\
batch_size=batch_size_value,\
shuffle=True,\
validation_data=(x_validate, [x_validate,x_validate]),\
callbacks=[model_checkpoint])
WARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:1033: The name tf.assign_add is deprecated. Please use tf.compat.v1.assign_add instead. WARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:1020: The name tf.assign is deprecated. Please use tf.compat.v1.assign instead. Train on 1036 samples, validate on 116 samples Epoch 1/200 1036/1036 [==============================] - 0s 410us/step - loss: 0.1816 - autoencoder_output_loss: 0.1798 - val_loss: 0.1439 - val_autoencoder_output_loss: 0.1428 Epoch 2/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.1355 - autoencoder_output_loss: 0.1345 - val_loss: 0.1023 - val_autoencoder_output_loss: 0.1015 Epoch 3/200 1036/1036 [==============================] - 0s 58us/step - loss: 0.0904 - autoencoder_output_loss: 0.0896 - val_loss: 0.0707 - val_autoencoder_output_loss: 0.0701 Epoch 4/200 1036/1036 [==============================] - 0s 66us/step - loss: 0.0655 - autoencoder_output_loss: 0.0649 - val_loss: 0.0594 - val_autoencoder_output_loss: 0.0589 Epoch 5/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0566 - autoencoder_output_loss: 0.0561 - val_loss: 0.0538 - val_autoencoder_output_loss: 0.0533 Epoch 6/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0518 - autoencoder_output_loss: 0.0513 - val_loss: 0.0508 - val_autoencoder_output_loss: 0.0504 Epoch 7/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0489 - autoencoder_output_loss: 0.0485 - val_loss: 0.0480 - val_autoencoder_output_loss: 0.0476 Epoch 8/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0462 - autoencoder_output_loss: 0.0458 - val_loss: 0.0454 - val_autoencoder_output_loss: 0.0450 Epoch 9/200 1036/1036 [==============================] - 0s 59us/step - loss: 0.0438 - autoencoder_output_loss: 0.0434 - val_loss: 0.0433 - val_autoencoder_output_loss: 0.0429 Epoch 10/200 1036/1036 [==============================] - 0s 58us/step - loss: 0.0415 - autoencoder_output_loss: 0.0412 - val_loss: 0.0412 - val_autoencoder_output_loss: 0.0409 Epoch 11/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0396 - autoencoder_output_loss: 0.0393 - val_loss: 0.0392 - val_autoencoder_output_loss: 0.0389 Epoch 12/200 1036/1036 [==============================] - 0s 57us/step - loss: 0.0379 - autoencoder_output_loss: 0.0376 - val_loss: 0.0376 - val_autoencoder_output_loss: 0.0373 Epoch 13/200 1036/1036 [==============================] - 0s 58us/step - loss: 0.0364 - autoencoder_output_loss: 0.0361 - val_loss: 0.0363 - val_autoencoder_output_loss: 0.0360 Epoch 14/200 1036/1036 [==============================] - 0s 80us/step - loss: 0.0351 - autoencoder_output_loss: 0.0348 - val_loss: 0.0353 - val_autoencoder_output_loss: 0.0350 Epoch 15/200 1036/1036 [==============================] - 0s 56us/step - loss: 0.0339 - autoencoder_output_loss: 0.0336 - val_loss: 0.0343 - val_autoencoder_output_loss: 0.0340 Epoch 16/200 1036/1036 [==============================] - 0s 58us/step - loss: 0.0327 - autoencoder_output_loss: 0.0325 - val_loss: 0.0332 - val_autoencoder_output_loss: 0.0330 Epoch 17/200 1036/1036 [==============================] - 0s 59us/step - loss: 0.0317 - autoencoder_output_loss: 0.0314 - val_loss: 0.0324 - val_autoencoder_output_loss: 0.0321 Epoch 18/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0308 - autoencoder_output_loss: 0.0305 - val_loss: 0.0315 - val_autoencoder_output_loss: 0.0313 Epoch 19/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0300 - autoencoder_output_loss: 0.0297 - val_loss: 0.0310 - val_autoencoder_output_loss: 0.0307 Epoch 20/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0293 - autoencoder_output_loss: 0.0290 - val_loss: 0.0303 - val_autoencoder_output_loss: 0.0301 Epoch 21/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0286 - autoencoder_output_loss: 0.0283 - val_loss: 0.0297 - val_autoencoder_output_loss: 0.0295 Epoch 22/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0280 - autoencoder_output_loss: 0.0278 - val_loss: 0.0292 - val_autoencoder_output_loss: 0.0290 Epoch 23/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0275 - autoencoder_output_loss: 0.0273 - val_loss: 0.0288 - val_autoencoder_output_loss: 0.0286 Epoch 24/200 1036/1036 [==============================] - 0s 66us/step - loss: 0.0270 - autoencoder_output_loss: 0.0268 - val_loss: 0.0284 - val_autoencoder_output_loss: 0.0282 Epoch 25/200 1036/1036 [==============================] - 0s 52us/step - loss: 0.0266 - autoencoder_output_loss: 0.0264 - val_loss: 0.0281 - val_autoencoder_output_loss: 0.0279 Epoch 26/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0262 - autoencoder_output_loss: 0.0260 - val_loss: 0.0276 - val_autoencoder_output_loss: 0.0274 Epoch 27/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0258 - autoencoder_output_loss: 0.0256 - val_loss: 0.0272 - val_autoencoder_output_loss: 0.0270 Epoch 28/200 1036/1036 [==============================] - 0s 52us/step - loss: 0.0254 - autoencoder_output_loss: 0.0252 - val_loss: 0.0269 - val_autoencoder_output_loss: 0.0267 Epoch 29/200 1036/1036 [==============================] - 0s 56us/step - loss: 0.0250 - autoencoder_output_loss: 0.0248 - val_loss: 0.0267 - val_autoencoder_output_loss: 0.0265 Epoch 30/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0247 - autoencoder_output_loss: 0.0245 - val_loss: 0.0263 - val_autoencoder_output_loss: 0.0261 Epoch 31/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0244 - autoencoder_output_loss: 0.0242 - val_loss: 0.0260 - val_autoencoder_output_loss: 0.0258 Epoch 32/200 1036/1036 [==============================] - 0s 58us/step - loss: 0.0241 - autoencoder_output_loss: 0.0239 - val_loss: 0.0257 - val_autoencoder_output_loss: 0.0255 Epoch 33/200 1036/1036 [==============================] - 0s 67us/step - loss: 0.0238 - autoencoder_output_loss: 0.0236 - val_loss: 0.0254 - val_autoencoder_output_loss: 0.0253 Epoch 34/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0235 - autoencoder_output_loss: 0.0233 - val_loss: 0.0252 - val_autoencoder_output_loss: 0.0250 Epoch 35/200 1036/1036 [==============================] - 0s 59us/step - loss: 0.0232 - autoencoder_output_loss: 0.0230 - val_loss: 0.0249 - val_autoencoder_output_loss: 0.0247 Epoch 36/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0231 - autoencoder_output_loss: 0.0229 - val_loss: 0.0247 - val_autoencoder_output_loss: 0.0245 Epoch 37/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0228 - autoencoder_output_loss: 0.0226 - val_loss: 0.0245 - val_autoencoder_output_loss: 0.0243 Epoch 38/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0226 - autoencoder_output_loss: 0.0224 - val_loss: 0.0244 - val_autoencoder_output_loss: 0.0242 Epoch 39/200 1036/1036 [==============================] - 0s 51us/step - loss: 0.0223 - autoencoder_output_loss: 0.0222 - val_loss: 0.0240 - val_autoencoder_output_loss: 0.0239 Epoch 40/200 1036/1036 [==============================] - 0s 52us/step - loss: 0.0220 - autoencoder_output_loss: 0.0219 - val_loss: 0.0239 - val_autoencoder_output_loss: 0.0237 Epoch 41/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0218 - autoencoder_output_loss: 0.0217 - val_loss: 0.0237 - val_autoencoder_output_loss: 0.0236 Epoch 42/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0216 - autoencoder_output_loss: 0.0215 - val_loss: 0.0236 - val_autoencoder_output_loss: 0.0234 Epoch 43/200 1036/1036 [==============================] - 0s 57us/step - loss: 0.0214 - autoencoder_output_loss: 0.0213 - val_loss: 0.0234 - val_autoencoder_output_loss: 0.0233 Epoch 44/200 1036/1036 [==============================] - 0s 56us/step - loss: 0.0213 - autoencoder_output_loss: 0.0211 - val_loss: 0.0232 - val_autoencoder_output_loss: 0.0231 Epoch 45/200 1036/1036 [==============================] - 0s 51us/step - loss: 0.0211 - autoencoder_output_loss: 0.0210 - val_loss: 0.0231 - val_autoencoder_output_loss: 0.0229 Epoch 46/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0209 - autoencoder_output_loss: 0.0208 - val_loss: 0.0229 - val_autoencoder_output_loss: 0.0228 Epoch 47/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0208 - autoencoder_output_loss: 0.0206 - val_loss: 0.0227 - val_autoencoder_output_loss: 0.0226 Epoch 48/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0206 - autoencoder_output_loss: 0.0205 - val_loss: 0.0226 - val_autoencoder_output_loss: 0.0224 Epoch 49/200 1036/1036 [==============================] - 0s 58us/step - loss: 0.0205 - autoencoder_output_loss: 0.0203 - val_loss: 0.0225 - val_autoencoder_output_loss: 0.0223 Epoch 50/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0204 - autoencoder_output_loss: 0.0202 - val_loss: 0.0223 - val_autoencoder_output_loss: 0.0222 Epoch 51/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0202 - autoencoder_output_loss: 0.0201 - val_loss: 0.0221 - val_autoencoder_output_loss: 0.0220 Epoch 52/200 1036/1036 [==============================] - 0s 58us/step - loss: 0.0201 - autoencoder_output_loss: 0.0199 - val_loss: 0.0220 - val_autoencoder_output_loss: 0.0218 Epoch 53/200 1036/1036 [==============================] - 0s 56us/step - loss: 0.0200 - autoencoder_output_loss: 0.0198 - val_loss: 0.0219 - val_autoencoder_output_loss: 0.0217 Epoch 54/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0199 - autoencoder_output_loss: 0.0197 - val_loss: 0.0218 - val_autoencoder_output_loss: 0.0217 Epoch 55/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0198 - autoencoder_output_loss: 0.0196 - val_loss: 0.0217 - val_autoencoder_output_loss: 0.0216 Epoch 56/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0197 - autoencoder_output_loss: 0.0195 - val_loss: 0.0216 - val_autoencoder_output_loss: 0.0214 Epoch 57/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0196 - autoencoder_output_loss: 0.0194 - val_loss: 0.0215 - val_autoencoder_output_loss: 0.0214 Epoch 58/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0194 - autoencoder_output_loss: 0.0193 - val_loss: 0.0214 - val_autoencoder_output_loss: 0.0213 Epoch 59/200 1036/1036 [==============================] - 0s 69us/step - loss: 0.0193 - autoencoder_output_loss: 0.0192 - val_loss: 0.0213 - val_autoencoder_output_loss: 0.0211 Epoch 60/200 1036/1036 [==============================] - 0s 52us/step - loss: 0.0193 - autoencoder_output_loss: 0.0191 - val_loss: 0.0213 - val_autoencoder_output_loss: 0.0212 Epoch 61/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0191 - autoencoder_output_loss: 0.0190 - val_loss: 0.0211 - val_autoencoder_output_loss: 0.0210 Epoch 62/200 1036/1036 [==============================] - 0s 58us/step - loss: 0.0190 - autoencoder_output_loss: 0.0189 - val_loss: 0.0210 - val_autoencoder_output_loss: 0.0209 Epoch 63/200 1036/1036 [==============================] - 0s 61us/step - loss: 0.0189 - autoencoder_output_loss: 0.0188 - val_loss: 0.0209 - val_autoencoder_output_loss: 0.0208 Epoch 64/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0189 - autoencoder_output_loss: 0.0188 - val_loss: 0.0209 - val_autoencoder_output_loss: 0.0208 Epoch 65/200 1036/1036 [==============================] - 0s 65us/step - loss: 0.0188 - autoencoder_output_loss: 0.0186 - val_loss: 0.0208 - val_autoencoder_output_loss: 0.0206 Epoch 66/200 1036/1036 [==============================] - 0s 58us/step - loss: 0.0187 - autoencoder_output_loss: 0.0186 - val_loss: 0.0207 - val_autoencoder_output_loss: 0.0206 Epoch 67/200 1036/1036 [==============================] - 0s 56us/step - loss: 0.0187 - autoencoder_output_loss: 0.0185 - val_loss: 0.0206 - val_autoencoder_output_loss: 0.0205 Epoch 68/200 1036/1036 [==============================] - 0s 56us/step - loss: 0.0186 - autoencoder_output_loss: 0.0184 - val_loss: 0.0206 - val_autoencoder_output_loss: 0.0205 Epoch 69/200 1036/1036 [==============================] - 0s 58us/step - loss: 0.0185 - autoencoder_output_loss: 0.0184 - val_loss: 0.0205 - val_autoencoder_output_loss: 0.0204 Epoch 70/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0184 - autoencoder_output_loss: 0.0183 - val_loss: 0.0205 - val_autoencoder_output_loss: 0.0203 Epoch 71/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0184 - autoencoder_output_loss: 0.0183 - val_loss: 0.0204 - val_autoencoder_output_loss: 0.0202 Epoch 72/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0183 - autoencoder_output_loss: 0.0182 - val_loss: 0.0203 - val_autoencoder_output_loss: 0.0202 Epoch 73/200 1036/1036 [==============================] - 0s 64us/step - loss: 0.0182 - autoencoder_output_loss: 0.0181 - val_loss: 0.0202 - val_autoencoder_output_loss: 0.0201 Epoch 74/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0182 - autoencoder_output_loss: 0.0181 - val_loss: 0.0201 - val_autoencoder_output_loss: 0.0200 Epoch 75/200 1036/1036 [==============================] - 0s 57us/step - loss: 0.0181 - autoencoder_output_loss: 0.0180 - val_loss: 0.0201 - val_autoencoder_output_loss: 0.0200 Epoch 76/200 1036/1036 [==============================] - 0s 64us/step - loss: 0.0181 - autoencoder_output_loss: 0.0179 - val_loss: 0.0201 - val_autoencoder_output_loss: 0.0199 Epoch 77/200 1036/1036 [==============================] - 0s 58us/step - loss: 0.0180 - autoencoder_output_loss: 0.0179 - val_loss: 0.0200 - val_autoencoder_output_loss: 0.0199 Epoch 78/200 1036/1036 [==============================] - 0s 56us/step - loss: 0.0179 - autoencoder_output_loss: 0.0178 - val_loss: 0.0200 - val_autoencoder_output_loss: 0.0198 Epoch 79/200 1036/1036 [==============================] - 0s 52us/step - loss: 0.0179 - autoencoder_output_loss: 0.0178 - val_loss: 0.0199 - val_autoencoder_output_loss: 0.0198 Epoch 80/200 1036/1036 [==============================] - 0s 60us/step - loss: 0.0178 - autoencoder_output_loss: 0.0177 - val_loss: 0.0199 - val_autoencoder_output_loss: 0.0198 Epoch 81/200 1036/1036 [==============================] - 0s 56us/step - loss: 0.0178 - autoencoder_output_loss: 0.0177 - val_loss: 0.0198 - val_autoencoder_output_loss: 0.0197 Epoch 82/200 1036/1036 [==============================] - 0s 66us/step - loss: 0.0178 - autoencoder_output_loss: 0.0177 - val_loss: 0.0198 - val_autoencoder_output_loss: 0.0197 Epoch 83/200 1036/1036 [==============================] - 0s 62us/step - loss: 0.0178 - autoencoder_output_loss: 0.0177 - val_loss: 0.0197 - val_autoencoder_output_loss: 0.0196 Epoch 84/200 1036/1036 [==============================] - 0s 56us/step - loss: 0.0177 - autoencoder_output_loss: 0.0175 - val_loss: 0.0197 - val_autoencoder_output_loss: 0.0196 Epoch 85/200 1036/1036 [==============================] - 0s 60us/step - loss: 0.0176 - autoencoder_output_loss: 0.0175 - val_loss: 0.0196 - val_autoencoder_output_loss: 0.0195 Epoch 86/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0175 - autoencoder_output_loss: 0.0174 - val_loss: 0.0200 - val_autoencoder_output_loss: 0.0198 Epoch 87/200 1036/1036 [==============================] - 0s 67us/step - loss: 0.0176 - autoencoder_output_loss: 0.0174 - val_loss: 0.0195 - val_autoencoder_output_loss: 0.0194 Epoch 88/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0174 - autoencoder_output_loss: 0.0173 - val_loss: 0.0195 - val_autoencoder_output_loss: 0.0193 Epoch 89/200 1036/1036 [==============================] - 0s 56us/step - loss: 0.0174 - autoencoder_output_loss: 0.0172 - val_loss: 0.0194 - val_autoencoder_output_loss: 0.0193 Epoch 90/200 1036/1036 [==============================] - 0s 52us/step - loss: 0.0173 - autoencoder_output_loss: 0.0172 - val_loss: 0.0194 - val_autoencoder_output_loss: 0.0193 Epoch 91/200 1036/1036 [==============================] - 0s 56us/step - loss: 0.0173 - autoencoder_output_loss: 0.0172 - val_loss: 0.0194 - val_autoencoder_output_loss: 0.0192 Epoch 92/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0172 - autoencoder_output_loss: 0.0171 - val_loss: 0.0193 - val_autoencoder_output_loss: 0.0192 Epoch 93/200 1036/1036 [==============================] - 0s 58us/step - loss: 0.0172 - autoencoder_output_loss: 0.0171 - val_loss: 0.0192 - val_autoencoder_output_loss: 0.0191 Epoch 94/200 1036/1036 [==============================] - 0s 63us/step - loss: 0.0172 - autoencoder_output_loss: 0.0171 - val_loss: 0.0194 - val_autoencoder_output_loss: 0.0192 Epoch 95/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0171 - autoencoder_output_loss: 0.0170 - val_loss: 0.0193 - val_autoencoder_output_loss: 0.0192 Epoch 96/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0171 - autoencoder_output_loss: 0.0170 - val_loss: 0.0193 - val_autoencoder_output_loss: 0.0192 Epoch 97/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0170 - autoencoder_output_loss: 0.0169 - val_loss: 0.0192 - val_autoencoder_output_loss: 0.0191 Epoch 98/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0170 - autoencoder_output_loss: 0.0169 - val_loss: 0.0191 - val_autoencoder_output_loss: 0.0190 Epoch 99/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0170 - autoencoder_output_loss: 0.0169 - val_loss: 0.0192 - val_autoencoder_output_loss: 0.0190 Epoch 100/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0170 - autoencoder_output_loss: 0.0169 - val_loss: 0.0191 - val_autoencoder_output_loss: 0.0190 Epoch 00100: saving model to ./log_weights/F_AE_50_weights_0.0078125.0100.hdf5 Epoch 101/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0169 - autoencoder_output_loss: 0.0168 - val_loss: 0.0190 - val_autoencoder_output_loss: 0.0189 Epoch 102/200 1036/1036 [==============================] - 0s 51us/step - loss: 0.0169 - autoencoder_output_loss: 0.0167 - val_loss: 0.0190 - val_autoencoder_output_loss: 0.0189 Epoch 103/200 1036/1036 [==============================] - 0s 52us/step - loss: 0.0168 - autoencoder_output_loss: 0.0167 - val_loss: 0.0190 - val_autoencoder_output_loss: 0.0189 Epoch 104/200 1036/1036 [==============================] - 0s 58us/step - loss: 0.0168 - autoencoder_output_loss: 0.0167 - val_loss: 0.0189 - val_autoencoder_output_loss: 0.0188 Epoch 105/200 1036/1036 [==============================] - 0s 51us/step - loss: 0.0167 - autoencoder_output_loss: 0.0166 - val_loss: 0.0189 - val_autoencoder_output_loss: 0.0188 Epoch 106/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0167 - autoencoder_output_loss: 0.0166 - val_loss: 0.0189 - val_autoencoder_output_loss: 0.0188 Epoch 107/200 1036/1036 [==============================] - 0s 59us/step - loss: 0.0167 - autoencoder_output_loss: 0.0166 - val_loss: 0.0189 - val_autoencoder_output_loss: 0.0188 Epoch 108/200 1036/1036 [==============================] - 0s 65us/step - loss: 0.0167 - autoencoder_output_loss: 0.0166 - val_loss: 0.0188 - val_autoencoder_output_loss: 0.0187 Epoch 109/200 1036/1036 [==============================] - 0s 52us/step - loss: 0.0166 - autoencoder_output_loss: 0.0165 - val_loss: 0.0188 - val_autoencoder_output_loss: 0.0187 Epoch 110/200 1036/1036 [==============================] - 0s 52us/step - loss: 0.0167 - autoencoder_output_loss: 0.0166 - val_loss: 0.0188 - val_autoencoder_output_loss: 0.0187 Epoch 111/200 1036/1036 [==============================] - 0s 64us/step - loss: 0.0165 - autoencoder_output_loss: 0.0164 - val_loss: 0.0188 - val_autoencoder_output_loss: 0.0187 Epoch 112/200 1036/1036 [==============================] - 0s 57us/step - loss: 0.0165 - autoencoder_output_loss: 0.0164 - val_loss: 0.0188 - val_autoencoder_output_loss: 0.0187 Epoch 113/200 1036/1036 [==============================] - 0s 70us/step - loss: 0.0165 - autoencoder_output_loss: 0.0164 - val_loss: 0.0188 - val_autoencoder_output_loss: 0.0187 Epoch 114/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0165 - autoencoder_output_loss: 0.0164 - val_loss: 0.0187 - val_autoencoder_output_loss: 0.0186 Epoch 115/200 1036/1036 [==============================] - 0s 57us/step - loss: 0.0164 - autoencoder_output_loss: 0.0163 - val_loss: 0.0187 - val_autoencoder_output_loss: 0.0186 Epoch 116/200 1036/1036 [==============================] - 0s 81us/step - loss: 0.0164 - autoencoder_output_loss: 0.0163 - val_loss: 0.0187 - val_autoencoder_output_loss: 0.0186 Epoch 117/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0164 - autoencoder_output_loss: 0.0163 - val_loss: 0.0187 - val_autoencoder_output_loss: 0.0186 Epoch 118/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0164 - autoencoder_output_loss: 0.0163 - val_loss: 0.0187 - val_autoencoder_output_loss: 0.0185 Epoch 119/200 1036/1036 [==============================] - 0s 51us/step - loss: 0.0163 - autoencoder_output_loss: 0.0162 - val_loss: 0.0185 - val_autoencoder_output_loss: 0.0184 Epoch 120/200 1036/1036 [==============================] - 0s 57us/step - loss: 0.0163 - autoencoder_output_loss: 0.0162 - val_loss: 0.0185 - val_autoencoder_output_loss: 0.0184 Epoch 121/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0164 - autoencoder_output_loss: 0.0163 - val_loss: 0.0185 - val_autoencoder_output_loss: 0.0184 Epoch 122/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0164 - autoencoder_output_loss: 0.0163 - val_loss: 0.0186 - val_autoencoder_output_loss: 0.0185 Epoch 123/200 1036/1036 [==============================] - 0s 52us/step - loss: 0.0164 - autoencoder_output_loss: 0.0163 - val_loss: 0.0186 - val_autoencoder_output_loss: 0.0185 Epoch 124/200 1036/1036 [==============================] - 0s 64us/step - loss: 0.0164 - autoencoder_output_loss: 0.0163 - val_loss: 0.0185 - val_autoencoder_output_loss: 0.0184 Epoch 125/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0163 - autoencoder_output_loss: 0.0162 - val_loss: 0.0188 - val_autoencoder_output_loss: 0.0187 Epoch 126/200 1036/1036 [==============================] - 0s 62us/step - loss: 0.0163 - autoencoder_output_loss: 0.0162 - val_loss: 0.0185 - val_autoencoder_output_loss: 0.0184 Epoch 127/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0163 - autoencoder_output_loss: 0.0162 - val_loss: 0.0185 - val_autoencoder_output_loss: 0.0183 Epoch 128/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0164 - autoencoder_output_loss: 0.0163 - val_loss: 0.0187 - val_autoencoder_output_loss: 0.0186 Epoch 129/200 1036/1036 [==============================] - 0s 58us/step - loss: 0.0163 - autoencoder_output_loss: 0.0162 - val_loss: 0.0186 - val_autoencoder_output_loss: 0.0185 Epoch 130/200 1036/1036 [==============================] - 0s 57us/step - loss: 0.0162 - autoencoder_output_loss: 0.0161 - val_loss: 0.0186 - val_autoencoder_output_loss: 0.0185 Epoch 131/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0162 - autoencoder_output_loss: 0.0161 - val_loss: 0.0186 - val_autoencoder_output_loss: 0.0185 Epoch 132/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0161 - autoencoder_output_loss: 0.0160 - val_loss: 0.0186 - val_autoencoder_output_loss: 0.0185 Epoch 133/200 1036/1036 [==============================] - 0s 56us/step - loss: 0.0161 - autoencoder_output_loss: 0.0160 - val_loss: 0.0185 - val_autoencoder_output_loss: 0.0184 Epoch 134/200 1036/1036 [==============================] - 0s 57us/step - loss: 0.0161 - autoencoder_output_loss: 0.0160 - val_loss: 0.0184 - val_autoencoder_output_loss: 0.0183 Epoch 135/200 1036/1036 [==============================] - 0s 59us/step - loss: 0.0161 - autoencoder_output_loss: 0.0160 - val_loss: 0.0184 - val_autoencoder_output_loss: 0.0183 Epoch 136/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0160 - autoencoder_output_loss: 0.0159 - val_loss: 0.0184 - val_autoencoder_output_loss: 0.0183 Epoch 137/200 1036/1036 [==============================] - 0s 65us/step - loss: 0.0160 - autoencoder_output_loss: 0.0159 - val_loss: 0.0184 - val_autoencoder_output_loss: 0.0183 Epoch 138/200 1036/1036 [==============================] - 0s 61us/step - loss: 0.0160 - autoencoder_output_loss: 0.0159 - val_loss: 0.0184 - val_autoencoder_output_loss: 0.0183 Epoch 139/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0160 - autoencoder_output_loss: 0.0159 - val_loss: 0.0184 - val_autoencoder_output_loss: 0.0183 Epoch 140/200 1036/1036 [==============================] - 0s 65us/step - loss: 0.0160 - autoencoder_output_loss: 0.0159 - val_loss: 0.0184 - val_autoencoder_output_loss: 0.0183 Epoch 141/200 1036/1036 [==============================] - 0s 58us/step - loss: 0.0159 - autoencoder_output_loss: 0.0158 - val_loss: 0.0183 - val_autoencoder_output_loss: 0.0182 Epoch 142/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0159 - autoencoder_output_loss: 0.0158 - val_loss: 0.0183 - val_autoencoder_output_loss: 0.0182 Epoch 143/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0159 - autoencoder_output_loss: 0.0158 - val_loss: 0.0183 - val_autoencoder_output_loss: 0.0182 Epoch 144/200 1036/1036 [==============================] - 0s 57us/step - loss: 0.0159 - autoencoder_output_loss: 0.0158 - val_loss: 0.0183 - val_autoencoder_output_loss: 0.0182 Epoch 145/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0159 - autoencoder_output_loss: 0.0158 - val_loss: 0.0182 - val_autoencoder_output_loss: 0.0181 Epoch 146/200 1036/1036 [==============================] - 0s 88us/step - loss: 0.0159 - autoencoder_output_loss: 0.0158 - val_loss: 0.0182 - val_autoencoder_output_loss: 0.0181 Epoch 147/200 1036/1036 [==============================] - 0s 57us/step - loss: 0.0158 - autoencoder_output_loss: 0.0157 - val_loss: 0.0183 - val_autoencoder_output_loss: 0.0181 Epoch 148/200 1036/1036 [==============================] - 0s 71us/step - loss: 0.0158 - autoencoder_output_loss: 0.0157 - val_loss: 0.0182 - val_autoencoder_output_loss: 0.0181 Epoch 149/200 1036/1036 [==============================] - 0s 68us/step - loss: 0.0158 - autoencoder_output_loss: 0.0157 - val_loss: 0.0198 - val_autoencoder_output_loss: 0.0197 Epoch 150/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0161 - autoencoder_output_loss: 0.0160 - val_loss: 0.0196 - val_autoencoder_output_loss: 0.0195 Epoch 151/200 1036/1036 [==============================] - 0s 52us/step - loss: 0.0160 - autoencoder_output_loss: 0.0159 - val_loss: 0.0197 - val_autoencoder_output_loss: 0.0196 Epoch 152/200 1036/1036 [==============================] - 0s 52us/step - loss: 0.0165 - autoencoder_output_loss: 0.0164 - val_loss: 0.0186 - val_autoencoder_output_loss: 0.0185 Epoch 153/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0163 - autoencoder_output_loss: 0.0162 - val_loss: 0.0185 - val_autoencoder_output_loss: 0.0184 Epoch 154/200 1036/1036 [==============================] - 0s 69us/step - loss: 0.0162 - autoencoder_output_loss: 0.0161 - val_loss: 0.0190 - val_autoencoder_output_loss: 0.0189 Epoch 155/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0164 - autoencoder_output_loss: 0.0163 - val_loss: 0.0186 - val_autoencoder_output_loss: 0.0185 Epoch 156/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0160 - autoencoder_output_loss: 0.0160 - val_loss: 0.0183 - val_autoencoder_output_loss: 0.0182 Epoch 157/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0160 - autoencoder_output_loss: 0.0159 - val_loss: 0.0184 - val_autoencoder_output_loss: 0.0183 Epoch 158/200 1036/1036 [==============================] - 0s 52us/step - loss: 0.0159 - autoencoder_output_loss: 0.0158 - val_loss: 0.0183 - val_autoencoder_output_loss: 0.0182 Epoch 159/200 1036/1036 [==============================] - 0s 59us/step - loss: 0.0159 - autoencoder_output_loss: 0.0158 - val_loss: 0.0183 - val_autoencoder_output_loss: 0.0182 Epoch 160/200 1036/1036 [==============================] - 0s 58us/step - loss: 0.0160 - autoencoder_output_loss: 0.0159 - val_loss: 0.0184 - val_autoencoder_output_loss: 0.0183 Epoch 161/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0158 - autoencoder_output_loss: 0.0157 - val_loss: 0.0183 - val_autoencoder_output_loss: 0.0182 Epoch 162/200 1036/1036 [==============================] - 0s 52us/step - loss: 0.0158 - autoencoder_output_loss: 0.0157 - val_loss: 0.0183 - val_autoencoder_output_loss: 0.0182 Epoch 163/200 1036/1036 [==============================] - 0s 57us/step - loss: 0.0158 - autoencoder_output_loss: 0.0157 - val_loss: 0.0183 - val_autoencoder_output_loss: 0.0182 Epoch 164/200 1036/1036 [==============================] - 0s 66us/step - loss: 0.0158 - autoencoder_output_loss: 0.0157 - val_loss: 0.0182 - val_autoencoder_output_loss: 0.0181 Epoch 165/200 1036/1036 [==============================] - 0s 80us/step - loss: 0.0158 - autoencoder_output_loss: 0.0157 - val_loss: 0.0182 - val_autoencoder_output_loss: 0.0181 Epoch 166/200 1036/1036 [==============================] - 0s 58us/step - loss: 0.0157 - autoencoder_output_loss: 0.0156 - val_loss: 0.0182 - val_autoencoder_output_loss: 0.0181 Epoch 167/200 1036/1036 [==============================] - 0s 79us/step - loss: 0.0157 - autoencoder_output_loss: 0.0156 - val_loss: 0.0182 - val_autoencoder_output_loss: 0.0181 Epoch 168/200 1036/1036 [==============================] - 0s 67us/step - loss: 0.0157 - autoencoder_output_loss: 0.0156 - val_loss: 0.0182 - val_autoencoder_output_loss: 0.0181 Epoch 169/200 1036/1036 [==============================] - 0s 63us/step - loss: 0.0157 - autoencoder_output_loss: 0.0156 - val_loss: 0.0181 - val_autoencoder_output_loss: 0.0180 Epoch 170/200 1036/1036 [==============================] - 0s 52us/step - loss: 0.0157 - autoencoder_output_loss: 0.0156 - val_loss: 0.0181 - val_autoencoder_output_loss: 0.0180 Epoch 171/200 1036/1036 [==============================] - 0s 52us/step - loss: 0.0157 - autoencoder_output_loss: 0.0156 - val_loss: 0.0180 - val_autoencoder_output_loss: 0.0179 Epoch 172/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0156 - autoencoder_output_loss: 0.0156 - val_loss: 0.0181 - val_autoencoder_output_loss: 0.0180 Epoch 173/200 1036/1036 [==============================] - 0s 53us/step - loss: 0.0156 - autoencoder_output_loss: 0.0155 - val_loss: 0.0181 - val_autoencoder_output_loss: 0.0180 Epoch 174/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0156 - autoencoder_output_loss: 0.0155 - val_loss: 0.0181 - val_autoencoder_output_loss: 0.0180 Epoch 175/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0157 - autoencoder_output_loss: 0.0156 - val_loss: 0.0181 - val_autoencoder_output_loss: 0.0180 Epoch 176/200 1036/1036 [==============================] - 0s 60us/step - loss: 0.0156 - autoencoder_output_loss: 0.0155 - val_loss: 0.0181 - val_autoencoder_output_loss: 0.0180 Epoch 177/200 1036/1036 [==============================] - 0s 59us/step - loss: 0.0156 - autoencoder_output_loss: 0.0155 - val_loss: 0.0181 - val_autoencoder_output_loss: 0.0180 Epoch 178/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0156 - autoencoder_output_loss: 0.0155 - val_loss: 0.0180 - val_autoencoder_output_loss: 0.0179 Epoch 179/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0156 - autoencoder_output_loss: 0.0155 - val_loss: 0.0180 - val_autoencoder_output_loss: 0.0179 Epoch 180/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0156 - autoencoder_output_loss: 0.0155 - val_loss: 0.0180 - val_autoencoder_output_loss: 0.0179 Epoch 181/200 1036/1036 [==============================] - 0s 62us/step - loss: 0.0156 - autoencoder_output_loss: 0.0155 - val_loss: 0.0180 - val_autoencoder_output_loss: 0.0179 Epoch 182/200 1036/1036 [==============================] - 0s 59us/step - loss: 0.0155 - autoencoder_output_loss: 0.0154 - val_loss: 0.0180 - val_autoencoder_output_loss: 0.0179 Epoch 183/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0155 - autoencoder_output_loss: 0.0154 - val_loss: 0.0180 - val_autoencoder_output_loss: 0.0179 Epoch 184/200 1036/1036 [==============================] - 0s 56us/step - loss: 0.0155 - autoencoder_output_loss: 0.0154 - val_loss: 0.0180 - val_autoencoder_output_loss: 0.0179 Epoch 185/200 1036/1036 [==============================] - 0s 56us/step - loss: 0.0155 - autoencoder_output_loss: 0.0154 - val_loss: 0.0180 - val_autoencoder_output_loss: 0.0179 Epoch 186/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0155 - autoencoder_output_loss: 0.0154 - val_loss: 0.0180 - val_autoencoder_output_loss: 0.0179 Epoch 187/200 1036/1036 [==============================] - 0s 58us/step - loss: 0.0155 - autoencoder_output_loss: 0.0154 - val_loss: 0.0179 - val_autoencoder_output_loss: 0.0178 Epoch 188/200 1036/1036 [==============================] - 0s 70us/step - loss: 0.0155 - autoencoder_output_loss: 0.0154 - val_loss: 0.0180 - val_autoencoder_output_loss: 0.0179 Epoch 189/200 1036/1036 [==============================] - 0s 58us/step - loss: 0.0155 - autoencoder_output_loss: 0.0154 - val_loss: 0.0179 - val_autoencoder_output_loss: 0.0178 Epoch 190/200 1036/1036 [==============================] - 0s 56us/step - loss: 0.0155 - autoencoder_output_loss: 0.0154 - val_loss: 0.0179 - val_autoencoder_output_loss: 0.0178 Epoch 191/200 1036/1036 [==============================] - 0s 57us/step - loss: 0.0155 - autoencoder_output_loss: 0.0154 - val_loss: 0.0180 - val_autoencoder_output_loss: 0.0179 Epoch 192/200 1036/1036 [==============================] - 0s 59us/step - loss: 0.0154 - autoencoder_output_loss: 0.0154 - val_loss: 0.0180 - val_autoencoder_output_loss: 0.0179 Epoch 193/200 1036/1036 [==============================] - 0s 56us/step - loss: 0.0155 - autoencoder_output_loss: 0.0154 - val_loss: 0.0179 - val_autoencoder_output_loss: 0.0178 Epoch 194/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0155 - autoencoder_output_loss: 0.0154 - val_loss: 0.0179 - val_autoencoder_output_loss: 0.0178 Epoch 195/200 1036/1036 [==============================] - 0s 55us/step - loss: 0.0155 - autoencoder_output_loss: 0.0154 - val_loss: 0.0180 - val_autoencoder_output_loss: 0.0179 Epoch 196/200 1036/1036 [==============================] - 0s 65us/step - loss: 0.0154 - autoencoder_output_loss: 0.0154 - val_loss: 0.0180 - val_autoencoder_output_loss: 0.0179 Epoch 197/200 1036/1036 [==============================] - 0s 56us/step - loss: 0.0154 - autoencoder_output_loss: 0.0154 - val_loss: 0.0179 - val_autoencoder_output_loss: 0.0178 Epoch 198/200 1036/1036 [==============================] - 0s 54us/step - loss: 0.0154 - autoencoder_output_loss: 0.0154 - val_loss: 0.0179 - val_autoencoder_output_loss: 0.0178 Epoch 199/200 1036/1036 [==============================] - 0s 51us/step - loss: 0.0154 - autoencoder_output_loss: 0.0153 - val_loss: 0.0179 - val_autoencoder_output_loss: 0.0178 Epoch 200/200 1036/1036 [==============================] - 0s 52us/step - loss: 0.0154 - autoencoder_output_loss: 0.0153 - val_loss: 0.0178 - val_autoencoder_output_loss: 0.0177 Epoch 00200: saving model to ./log_weights/F_AE_50_weights_0.0078125.0200.hdf5
loss = F_AE_history.history['loss']
val_loss = F_AE_history.history['val_loss']
epochs = range(epochs_number)
plt.plot(epochs, loss, 'bo', label='Training Loss')
plt.plot(epochs, val_loss, 'r', label='Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.plot(epochs[250:], loss[250:], 'bo', label='Training Loss')
plt.plot(epochs[250:], val_loss[250:], 'r', label='Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
p_data=F_AE.predict(x_test)
numbers=x_test.shape[0]*x_test.shape[1]
print("MSE for one-to-one map layer",np.sum(np.power(np.array(p_data)[0]-x_test,2))/numbers)
print("MSE for feature selection layer",np.sum(np.power(np.array(p_data)[1]-x_test,2))/numbers)
MSE for one-to-one map layer 0.012284795973036024 MSE for feature selection layer 0.016594519085354274
F.show_data_figures(p_data[0][0:160],w=20,h=20,columns = 20)
F.show_data_figures(p_data[1][0:160],w=20,h=20,columns = 20)
FS_layer_output=feature_selection_output.predict(x_test)
F.show_data_figures(FS_layer_output[0:160],w=20,h=20,columns = 20)
print(np.sum(FS_layer_output[0]>0))
27
key_feature_catch_index=[]
for key_feature_catch_i in FS_layer_output[0:160]:
key_feature_catch_i_=key_feature_catch_i.reshape(20,20)
key_feature_catch_i_nonzeros_index=np.where(key_feature_catch_i_>0)
key_feature_catch_i_nonzeros_index_pair=[]
for i in np.arange(len(key_feature_catch_i_nonzeros_index[0])):
key_feature_catch_i_nonzeros_index_pair.append([key_feature_catch_i_nonzeros_index[0][i],key_feature_catch_i_nonzeros_index[1][i]])
key_feature_catch_index.append(key_feature_catch_i_nonzeros_index_pair)
F.show_data_figures_with_hierarchy_keyfeature(FS_layer_output[0:160],key_feature_catch_index,w=20,h=20,columns = 20)
F.show_data_figures_with_hierarchy_keyfeature(x_test[0:160],key_feature_catch_index,w=20,h=20,columns = 20)
# Gray
'''
key_features=F.top_k_keepWeights_1(F_AE.get_layer(index=1).get_weights()[0],key_feture_number)
plt.axis('off')
plt.imshow(key_features.reshape((20, 20)),plt.cm.gray)
print(np.sum(F_AE.get_layer(index=1).get_weights()[0]>0))
'''
# Red
key_features=F.top_k_keepWeights_1(F_AE.get_layer(index=1).get_weights()[0],key_feture_number)
key_features_=key_features.reshape((20, 20))
plt.axis('off')
canvas=np.zeros((20,20))
plt.imshow(canvas,plt.cm.gray)
for index_i in np.arange(key_features_.shape[0]):
for index_j in np.arange(key_features_.shape[1]):
if key_features_[index_i,index_j]==1:
plt.scatter(index_j,index_i,s=100,color='r',marker="s")
plt.tight_layout()
plt.show()
print(np.sum(F_AE.get_layer(index=1).get_weights()[0]>0))
print(np.sum(key_features>0))
400 50
train_feature=C_train_x
train_label=C_train_y
test_feature=C_test_x
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
Training accuracy: 1.0 Training accuracy: 1.0 Testing accuracy: 1.0 Testing accuracy: 1.0
F.show_data_figures(C_test_x[0:160],w=20,h=20,columns = 20)
print(np.sum(FS_layer_output[0]>0))
27
row=np.where(key_features_>0)[0]
column=np.where(key_features_>0)[1]
selected_position_list=[]
for i in np.arange(key_feture_number):
selected_position_list.append((row[i],column[i]))
train_feature=feature_selection_output.predict(C_train_x)
print("train_feature>0: ",np.sum(train_feature[0]>0))
print(train_feature.shape)
train_label=C_train_y
test_feature=feature_selection_output.predict(C_test_x)
print("test_feature>0: ",np.sum(test_feature[0]>0))
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
train_feature>0: 36 (1152, 400) test_feature>0: 27 (288, 400) Training accuracy: 1.0 Training accuracy: 1.0 Testing accuracy: 0.9583333333333334 Testing accuracy: 0.9583333333333334
F.show_data_figures(test_feature[0:160],w=20,h=20,columns = 20)
b) Sparse matrix
train_feature=feature_selection_output.predict(C_train_x)
print(train_feature.shape)
train_label=C_train_y
test_feature=feature_selection_output.predict(C_test_x)
print(test_feature.shape)
test_label=C_test_y
train_feature_sparse=sparse.coo_matrix(train_feature)
test_feature_sparse=sparse.coo_matrix(test_feature)
p_seed=seed
F.ETree(train_feature_sparse,train_label,test_feature_sparse,test_label,p_seed)
(1152, 400) (288, 400) Training accuracy: 1.0 Training accuracy: 1.0 Testing accuracy: 0.9583333333333334 Testing accuracy: 0.9583333333333334
c) Compression
train_feature_=feature_selection_output.predict(C_train_x)
train_feature=F.compress_zero(train_feature_,key_feture_number)
print(train_feature.shape)
train_label=C_train_y
test_feature_=feature_selection_output.predict(C_test_x)
test_feature=F.compress_zero(test_feature_,key_feture_number)
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
(1152, 50) (288, 50) Training accuracy: 1.0 Training accuracy: 1.0 Testing accuracy: 0.8402777777777778 Testing accuracy: 0.8402777777777778
d) Compression with structure
train_feature_=feature_selection_output.predict(C_train_x)
train_feature=F.compress_zero_withkeystructure(train_feature_,selected_position_list,w=20,h=20)
print(train_feature.shape)
train_label=C_train_y
test_feature_=feature_selection_output.predict(C_test_x)
test_feature=F.compress_zero_withkeystructure(test_feature_,selected_position_list,w=20,h=20)
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
(1152, 50) (288, 50) Training accuracy: 1.0 Training accuracy: 1.0 Testing accuracy: 0.9791666666666666 Testing accuracy: 0.9791666666666666
train_feature=np.multiply(C_train_x, key_features)
print("train_feature>0: ",np.sum(train_feature[0]>0))
print(train_feature.shape)
train_label=C_train_y
test_feature=np.multiply(C_test_x, key_features)
print("test_feature>0: ",np.sum(test_feature[0]>0))
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
train_feature>0: 36 (1152, 400) test_feature>0: 27 (288, 400) Training accuracy: 1.0 Training accuracy: 1.0 Testing accuracy: 0.9583333333333334 Testing accuracy: 0.9583333333333334
F.show_data_figures(test_feature[0:160],w=20,h=20,columns = 20)
b) Sparse matrix
train_feature=np.multiply(C_train_x, key_features)
print(train_feature.shape)
train_label=C_train_y
test_feature=np.multiply(C_test_x, key_features)
print(test_feature.shape)
test_label=C_test_y
train_feature_sparse=sparse.coo_matrix(train_feature)
test_feature_sparse=sparse.coo_matrix(test_feature)
p_seed=seed
F.ETree(train_feature_sparse,train_label,test_feature_sparse,test_label,p_seed)
(1152, 400) (288, 400) Training accuracy: 1.0 Training accuracy: 1.0 Testing accuracy: 0.9583333333333334 Testing accuracy: 0.9583333333333334
c) Compression
train_feature_=np.multiply(C_train_x, key_features)
train_feature=F.compress_zero(train_feature_,key_feture_number)
print(train_feature.shape)
train_label=C_train_y
test_feature_=np.multiply(C_test_x, key_features)
test_feature=F.compress_zero(test_feature_,key_feture_number)
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
(1152, 50) (288, 50) Training accuracy: 1.0 Training accuracy: 1.0 Testing accuracy: 0.8472222222222222 Testing accuracy: 0.8472222222222222
d) Compression with structure
train_feature_=np.multiply(C_train_x, key_features)
train_feature=F.compress_zero_withkeystructure(train_feature_,selected_position_list,w=20,h=20)
print(train_feature.shape)
train_label=C_train_y
test_feature_=np.multiply(C_test_x, key_features)
test_feature=F.compress_zero_withkeystructure(test_feature_,selected_position_list,w=20,h=20)
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
(1152, 50) (288, 50) Training accuracy: 1.0 Training accuracy: 1.0 Testing accuracy: 0.9791666666666666 Testing accuracy: 0.9791666666666666
train_feature=latent_encoder_score_F_AE.predict(C_train_x)
print(train_feature.shape)
train_label=C_train_y
test_feature=latent_encoder_score_F_AE.predict(C_test_x)
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
(1152, 50) (288, 50) Training accuracy: 1.0 Training accuracy: 1.0 Testing accuracy: 0.9756944444444444 Testing accuracy: 0.9756944444444444
train_feature=latent_encoder_choose_F_AE.predict(C_train_x)
print(train_feature.shape)
train_label=C_train_y
test_feature=latent_encoder_choose_F_AE.predict(C_test_x)
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
(1152, 50) (288, 50) Training accuracy: 1.0 Training accuracy: 1.0 Testing accuracy: 0.9756944444444444 Testing accuracy: 0.9756944444444444
key_feature_catch_index=[]
for key_feature_catch_i in FS_layer_output[0:160]:
key_feature_catch_i_=key_feature_catch_i.reshape(20,20)
key_feature_catch_i_nonzeros_index=np.where(key_feature_catch_i_>0)
key_feature_catch_i_nonzeros_index_pair=[]
for i in np.arange(len(key_feature_catch_i_nonzeros_index[0])):
key_feature_catch_i_nonzeros_index_pair.append([key_feature_catch_i_nonzeros_index[0][i],key_feature_catch_i_nonzeros_index[1][i]])
key_feature_catch_index.append(key_feature_catch_i_nonzeros_index_pair)
F.show_data_figures_with_hierarchy_keyfeature(x_test[0:160],key_feature_catch_index,w=20,h=20,columns = 20)
from sklearn.linear_model import LinearRegression
def mse_check(train, test):
LR = LinearRegression(n_jobs = -1)
LR.fit(train[0], train[1])
MSELR = ((LR.predict(test[0]) - test[1]) ** 2).mean()
return MSELR
train_feature_=np.multiply(C_train_x, key_features)
C_train_selected_x=F.compress_zero_withkeystructure(train_feature_,selected_position_list)
print(C_train_selected_x.shape)
test_feature_=np.multiply(C_test_x, key_features)
C_test_selected_x=F.compress_zero_withkeystructure(test_feature_,selected_position_list)
print(C_test_selected_x.shape)
train_feature_tuple=(C_train_selected_x,C_train_x)
test_feature_tuple=(C_test_selected_x,C_test_x)
reconstruction_loss=mse_check(train_feature_tuple, test_feature_tuple)
print(reconstruction_loss)
(1152, 50) (288, 50) 0.0161143192169816